前言:
再來就是建立和訓練模型
程式碼:
方案一
model = tf.keras.Sequential([
layers.Conv2D(16, kernel_size=(3, 3), activation='relu', input_shape=(28, 28, 1)),
layers.MaxPool2D(pool_size=(2, 2)),
layers.Conv2D(32, kernel_size=(3, 3), activation='relu'),
layers.MaxPool2D(pool_size=(2, 2)),
layers.Conv2D(64, kernel_size=(3, 3), activation='relu'),
layers.MaxPool2D(pool_size=(2, 2)),
layers.Flatten(),
layers.Dense(512, activation='relu'),
layers.Dense(10, activation='softmax')
])
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
EPOCH = 10 #訓練次數
history = model.fit(
train_batches,
validation_data=test_batches,
epochs=EPOCH
)
model.save('./keras_model.h5')
方案二
# 以訓練好的 ResNet50 為基礎來建立模型,
# 捨棄 ResNet50 頂層的 fully connected layers
net = ResNet50(include_top=False, weights='imagenet', input_tensor=None,
input_shape=(IMAGE_SIZE[0],IMAGE_SIZE[1],3))
x = net.output
x = Flatten()(x)
# 增加 DropOut layer
x = Dropout(0.5)(x)
# 增加 Dense layer,以 softmax 產生個類別的機率值
output_layer = Dense(NUM_CLASSES, activation='softmax', name='softmax')(x)
# 設定凍結與要進行訓練的網路層
net_final = Model(inputs=net.input, outputs=output_layer)
for layer in net_final.layers[:FREEZE_LAYERS]:
layer.trainable = False
for layer in net_final.layers[FREEZE_LAYERS:]:
layer.trainable = True
# 使用 Adam optimizer,以較低的 learning rate 進行 fine-tuning
net_final.compile(optimizer='adam',#(lr=1e-5),
loss='categorical_crossentropy', metrics=['accuracy'])
# 輸出整個網路結構
print(net_final.summary())
# 訓練模型
net_final.fit_generator
# 儲存訓練好的模型
net_final.save(WEIGHTS_FINAL)